To copy the code, click the button in the upper right corner of the code-chunks.
rm(list = ls())
gc()
fpackage.check: Check if packages are installed (and
install if not) in Rfsave: Function to save data with time stamp in correct
directoryfload: Load R-objects under new namesfshowdf: Print objects (tibble / data.frame) nicely on
screen in .Rmd.fplot_graph: visualize the network topology and agent
positions (degree-trait correlation)fpackage.check <- function(packages) {
lapply(packages, FUN = function(x) {
if (!require(x, character.only = TRUE)) {
install.packages(x, dependencies = TRUE)
library(x, character.only = TRUE)
}
})
}
fsave <- function(x, file, location = "./data/processed/", ...) {
if (!dir.exists(location))
dir.create(location)
datename <- substr(gsub("[:-]", "", Sys.time()), 1, 8)
totalname <- paste(location, datename, file, sep = "")
print(paste("SAVED: ", totalname, sep = ""))
save(x, file = totalname)
}
fload <- function(fileName) {
load(fileName)
get(ls()[ls() != "fileName"])
}
fshowdf <- function(x, ...) {
knitr::kable(x, digits = 2, "html", ...) %>%
kableExtra::kable_styling(bootstrap_options = c("striped", "hover")) %>%
kableExtra::scroll_box(width = "100%", height = "300px")
}
fplot_graph <- function(graph, main=NULL, layout_algo=NULL,
col1 = "#FFD700", col2 = "#800080") {
plot(graph,
main = main,
layout = layout_algo,
vertex.label = NA,
vertex.size = degree(graph) + 2, # node size based on degree
vertex.color = ifelse(V(graph)$role == "trendsetter", col1, col2),
edge.width = 0.5,
edge.color = "darkgrey")
#add legend
legend("bottomleft",
legend = c("Trendsetter", "Conformist"),
pch = 21,
col = c(col1,col2),
pt.bg = c(col1,col2),
pt.cex = 3,
bty = "n")
}
tidyverseplotlyigraphviplotggplot2ggpubrplotlyparallelforeachdoParallelpackages = c("tidyverse", "igraph", "vioplot", "ggplot2", "ggpubr", "plotly", "parallel", "foreach",
"doParallel")
invisible(fpackage.check(packages))
rm(packages)
A function to calculate the payoff of a specific choice, for an agent (id), given the agents’ current statuses, the network structure, and the utility function parameters:
futility <- function(agent_id, choice, agents, network, params) {
# get ego and his local neighborhood
ego <- agents[agent_id, ]
neighbors <- neighbors(network, ego$id)
alters <- agents[as.numeric(neighbors), ]
n <- nrow(alters)
# count number of neighbors who did (not) follow the trend
n1 <- sum(alters$choice == 1)
n0 <- sum(alters$choice == 0)
# calculate expected utility (depending on alters' choices in prior round)
if(ego$role == "conformist") {
choice_payoff <- ifelse(choice == 0, params$s, 0)
coordination_payoff <- ifelse(choice == 0, # normalized by n
(params$w/n) * n0,
(params$z/n) * n1)
} else {
choice_payoff <- ifelse(choice == 1, params$e, 0)
coordination_payoff <- 0
}
return(list(utility = choice_payoff + coordination_payoff,
n1 = n1, n0 = n0))
}
A function to generate a degree sequence based on a power-law / log-normal distribution:
fdegseq <- function(n, dist = "power-law", alpha, k_min = 1, k_max = n - 1, seed = NULL) {
if (!is.null(seed)) {
set.seed(seed)
}
# generate a degree sequence based on a power-law distribution of the form p(k) ∝ k^{-alpha}
# create probability distribution
probs <- (1/(k_min:k_max))^alpha
# normalize probabilities
probs <- probs/sum(probs)
# sample a degree sequence
degseq <- sample(k_min:k_max, size = n, replace = TRUE, prob = (1/(k_min:k_max))^alpha)
# correct the degree sequence if its sum is odd (necessary for the configuration model)
if (sum(degseq)%%2 != 0) {
degseq[1] <- degseq[1] + 1
}
if (dist == "power-law") {
# if the specified distribution type is power-law, return the degree sequence
return(degseq)
} else if (dist == "log-normal") {
# if the specified distribution type is log-normal, generate a degree sequence following
# this distribution; but with a same mean degree <k> as its 'scale-free' counterpart.
# calculate mean degree in sequence
mean_deg <- mean(degseq)
# generate raw log-normal degree sequence
raw_degseq <- rlnorm(n = n, meanlog = log(mean_deg), sdlog = 1)
# re-scale the degree sequence to match the target mean degree
scaling_fctr <- mean_deg/mean(raw_degseq)
scaled_degseq <- raw_degseq * scaling_fctr
# apply bounds [k_min, k_max] and round to integer values
bounded_degseq <- pmin(pmax(round(scaled_degseq), k_min), k_max)
# since the mean may shift after rounding and bounding, re-scale again:
scaling_fctr2 <- mean_deg/mean(bounded_degseq)
degseq <- pmin(pmax(round(bounded_degseq * scaling_fctr2), k_min), k_max)
# correct the degree sequence if its sum is odd (necessary for the configuration model)
if (sum(degseq)%%2 != 0) {
degseq[1] <- degseq[1] + 1
}
return(degseq)
} else {
stop("Invalid distribution type. Please choose 'power-law' or 'log-normal'.")
}
}
n = 96 # number of agents
p_t = 0.1 # proportion 'trendsetters'
# plotting window
par(mfrow = c(1, 2))
for (i in c("power-law", "log-normal")) {
# generate degree sequence using a seed
degseq <- fdegseq(n = n, dist = i, alpha = 2.7, k_min = 3, seed = 123)
# retrieve <k>
mean_deg <- mean(degseq)
# to graph object, based on the Viger-Lapaty algorithm
network <- sample_degseq(degseq, method = "vl")
# randomly assign roles to nodes, based on proportion trendsetters
V(network)$role <- sample(c(rep("trendsetter", floor(n * p_t)), rep("conformist", n - floor(n * p_t))))
# plot the graph
fplot_graph(network, main = paste(i, "degree distribution\n", "<k>=", round(mean_deg, 2)))
}

Produce random networks with the same average degree (<k>) as scale-free counterparts (by tweaking edge probability p):
# loop over a higher number of simulated configuration model to get an accurate estimate of average
# degree <k> in scale-free networks
nIter <- 1000
alphas <- c(2.4, 2.7, 3)
kmean <- list()
for (alpha in alphas) {
kmean[[as.character(alpha)]] <- numeric(nIter)
for (i in 1:nIter) {
try({
degseq <- fdegseq(n = 96, alpha = alpha, k_min = 3)
network <- sample_degseq(degseq, method = "vl")
kmean[[as.character(alpha)]][i] <- mean(degree(network))
}, silent = TRUE)
}
}
plist <- list(mean(kmean$`2.4`[kmean$`2.4` != 0])/(n - 1), mean(kmean$`2.7`[kmean$`2.7` != 0])/(n - 1),
mean(kmean$`3`[kmean$`3` != 0])/(n - 1))
par(mfrow = c(1, length(plist)), mar = c(1, 1, 3, 1))
for (i in 1:length(plist)) {
network <- erdos.renyi.game(96, p = plist[[i]])
V(network)$role <- sample(c(rep("trendsetter", floor(n * p_t)), rep("conformist", n - floor(n * p_t))))
fplot_graph(network, main = paste("random network\n", "p =", round(plist[[i]], 3)))
}

network <- sample_smallworld(dim = 1, size = 96, nei = 3, p = 0.1)
V(network)$role <- sample(c(rep("trendsetter", floor(n * p_t)), rep("conformist", n - floor(n * p_t))))
fplot_graph(network, main = "small-world network")

# plot degree distributions side-by-side
er <- erdos.renyi.game(n, p = 0.01)
sw <- sample_smallworld(dim = 1, size = n, nei = 3, p = 0.1)
sf <- sample_degseq(fdegseq(n = n, alpha = 2.7, k_min = 3), method = "vl")
ln <- sample_degseq(fdegseq(n = n, alpha = 2.7, dist = "log-normal", k_min = 3), method = "vl")
par(mfrow = c(1, 4))
plot(degree_distribution(er), pch = 20, xlab = "k", ylab = "P(k)", las = 1, main = "Random graph", log = "xy")
plot(degree_distribution(sw), pch = 20, xlab = "k", ylab = "P(k)", las = 1, main = "Small-world graph",
log = "xy")
plot(degree_distribution(sf), pch = 20, xlab = "k", ylab = "P(k)", las = 1, main = "Scale-free graph",
log = "xy")
plot(degree_distribution(ln), pch = 20, xlab = "k", ylab = "P(k)", las = 1, main = "Log-normal distribution",
log = "xy")

Rewiring algorithm to tune degree (dis)assortativity (Newman 2002):
frewire_r <- function(network, target_r, max_iter = 1e+05, tol = 0.01, verbose = TRUE) {
current_r <- assortativity_degree(network)
iteration <- 1
if (verbose) {
cat("Target assortativity coefficient:", target_r, "\n")
cat("Starting assortativity coefficient:", current_r, "\n")
cat("Tolerance:", tol, "\n")
}
while (abs(current_r - target_r) > tol && iteration < max_iter) {
# get network edges
edges <- E(network)
# to edgelist
edge_list <- ends(network, edges)
# randomly select two pairs of connected nodes
idx1 <- sample(1:nrow(edge_list), 1)
idx2 <- sample(1:nrow(edge_list), 1)
# extract node indices
u1 <- edge_list[idx1, 1] # node 1 of first edge
v1 <- edge_list[idx1, 2] # node 2 of first edge
u2 <- edge_list[idx2, 1] # etc
v2 <- edge_list[idx2, 2]
# check if the two pairs of connected nodes (u1, v1; u2, v2) are disjoint
if (length(unique(c(u1, v1, u2, v2))) == 4) {
# check if there is already an edge across the node-pairs ensure no loops and no
# duplicate edges
if (!are_adjacent(network, u1, u2) && !are_adjacent(network, v1, v2) && u1 != v2 && u2 !=
v1) {
# perform the edge swap (u1,v1) <-> (u2,v2) becomes (u1,v2) <-> (u2,v1)
new_network <- network # copy network
# check if the new edges already exist to avoid duplicates
if (!are_adjacent(new_network, u1, v2) && !are_adjacent(new_network, u2, v1)) {
# add edges
new_network <- add_edges(new_network, c(u1, v2, u2, v1))
# remove edges
new_network <- delete_edges(new_network, get.edge.ids(new_network, c(u1, v1, u2, v2)))
# new assortativity
new_r <- assortativity_degree(new_network)
# accept tie swap if it brings us closer to the target assortativity
if (abs(new_r - target_r) < abs(current_r - target_r)) {
current_r <- new_r
network <- new_network
if (verbose) {
cat("Rewiring at iteration", iteration, "brought assortativity closer to target! Current assortativity coefficient:",
new_r, "\n")
}
}
}
}
}
iteration <- iteration + 1
}
if (verbose) {
cat("Final assortativity coefficient:", current_r, "\n")
if (abs(current_r - target_r) <= tol) {
cat("Target reached within tolerance.\n")
} else {
cat("Reached maximum iterations without meeting target.\n")
}
}
return(network)
}
Apply the function:
# initialize scale-free network
network <- sample_degseq(fdegseq(n = n, alpha = 2.7, k_min = 3, seed = 123), method = "vl")
V(network)$role <- sample(c(rep("trendsetter", floor(n * p_t)), rep("conformist", n - floor(n * p_t))))
# get current assortativity coefficient
assortativity_degree(network)
#> [1] -0.1485165
# set new targets
frewire_r(network, target_r = -0.2, max_iter = 1000)
#> Target assortativity coefficient: -0.2
#> Starting assortativity coefficient: -0.1485165
#> Tolerance: 0.01
#> Rewiring at iteration 4 brought assortativity closer to target! Current assortativity coefficient: -0.1505027
#> Rewiring at iteration 12 brought assortativity closer to target! Current assortativity coefficient: -0.1506682
#> Rewiring at iteration 36 brought assortativity closer to target! Current assortativity coefficient: -0.1509165
#> Rewiring at iteration 45 brought assortativity closer to target! Current assortativity coefficient: -0.151082
#> Rewiring at iteration 48 brought assortativity closer to target! Current assortativity coefficient: -0.1515786
#> Rewiring at iteration 56 brought assortativity closer to target! Current assortativity coefficient: -0.1546131
#> Rewiring at iteration 60 brought assortativity closer to target! Current assortativity coefficient: -0.1557442
#> Rewiring at iteration 66 brought assortativity closer to target! Current assortativity coefficient: -0.1559097
#> Rewiring at iteration 82 brought assortativity closer to target! Current assortativity coefficient: -0.1570408
#> Rewiring at iteration 87 brought assortativity closer to target! Current assortativity coefficient: -0.1570959
#> Rewiring at iteration 95 brought assortativity closer to target! Current assortativity coefficient: -0.1615098
#> Rewiring at iteration 99 brought assortativity closer to target! Current assortativity coefficient: -0.1654823
#> Rewiring at iteration 103 brought assortativity closer to target! Current assortativity coefficient: -0.1665306
#> Rewiring at iteration 114 brought assortativity closer to target! Current assortativity coefficient: -0.1666409
#> Rewiring at iteration 117 brought assortativity closer to target! Current assortativity coefficient: -0.1667513
#> Rewiring at iteration 118 brought assortativity closer to target! Current assortativity coefficient: -0.1668065
#> Rewiring at iteration 127 brought assortativity closer to target! Current assortativity coefficient: -0.166972
#> Rewiring at iteration 140 brought assortativity closer to target! Current assortativity coefficient: -0.167303
#> Rewiring at iteration 147 brought assortativity closer to target! Current assortativity coefficient: -0.1676341
#> Rewiring at iteration 149 brought assortativity closer to target! Current assortativity coefficient: -0.1677444
#> Rewiring at iteration 154 brought assortativity closer to target! Current assortativity coefficient: -0.1677996
#> Rewiring at iteration 162 brought assortativity closer to target! Current assortativity coefficient: -0.1689306
#> Rewiring at iteration 166 brought assortativity closer to target! Current assortativity coefficient: -0.1689858
#> Rewiring at iteration 174 brought assortativity closer to target! Current assortativity coefficient: -0.1695927
#> Rewiring at iteration 179 brought assortativity closer to target! Current assortativity coefficient: -0.1703651
#> Rewiring at iteration 180 brought assortativity closer to target! Current assortativity coefficient: -0.1726824
#> Rewiring at iteration 191 brought assortativity closer to target! Current assortativity coefficient: -0.1733445
#> Rewiring at iteration 204 brought assortativity closer to target! Current assortativity coefficient: -0.1736755
#> Rewiring at iteration 207 brought assortativity closer to target! Current assortativity coefficient: -0.1757997
#> Rewiring at iteration 208 brought assortativity closer to target! Current assortativity coefficient: -0.1758549
#> Rewiring at iteration 210 brought assortativity closer to target! Current assortativity coefficient: -0.1759652
#> Rewiring at iteration 213 brought assortativity closer to target! Current assortativity coefficient: -0.1762963
#> Rewiring at iteration 220 brought assortativity closer to target! Current assortativity coefficient: -0.1764618
#> Rewiring at iteration 242 brought assortativity closer to target! Current assortativity coefficient: -0.1786136
#> Rewiring at iteration 247 brought assortativity closer to target! Current assortativity coefficient: -0.183993
#> Rewiring at iteration 267 brought assortativity closer to target! Current assortativity coefficient: -0.1841585
#> Rewiring at iteration 276 brought assortativity closer to target! Current assortativity coefficient: -0.185262
#> Rewiring at iteration 281 brought assortativity closer to target! Current assortativity coefficient: -0.1854275
#> Rewiring at iteration 291 brought assortativity closer to target! Current assortativity coefficient: -0.1877448
#> Rewiring at iteration 314 brought assortativity closer to target! Current assortativity coefficient: -0.1878551
#> Rewiring at iteration 317 brought assortativity closer to target! Current assortativity coefficient: -0.1880206
#> Rewiring at iteration 323 brought assortativity closer to target! Current assortativity coefficient: -0.191331
#> Final assortativity coefficient: -0.191331
#> Target reached within tolerance.
#> IGRAPH 5bd224a U--- 96 267 -- Degree sequence random graph
#> + attr: name (g/c), method (g/c), role (v/c)
#> + edges from 5bd224a:
#> [1] 1--26 1--24 1--10 2--24 2--87 2--31 2--11 2--53 2-- 3 3--24 4--84 4--47 4--61
#> [14] 4--24 5--24 5--23 5--94 5--38 5--55 5--54 5-- 9 5--58 5--80 5--60 5--11 6--89
#> [27] 6--81 6--59 7--58 7--53 7--61 8--36 8--67 8--24 8--72 8--65 9--59 9--50 10--11
#> [40] 10--14 11--95 11--38 11--73 11--35 11--16 11--22 11--47 11--72 11--62 11--24 11--63 12--24
#> [53] 12--50 13--55 13--24 13--51 14--95 14--32 15--24 15--42 16--93 16--48 16--63 16--73 17--21
#> [66] 17--69 18--32 18--91 19--87 20--67 20--45 20--65 20--77 20--31 20--50 20--37 20--60 20--87
#> [79] 20--61 20--24 20--44 21--28 21--42 21--53 21--33 21--22 22--87 23--27 23--37 24--45 24--33
#> [92] 24--73 24--87 24--54 24--44 24--31 24--74 24--27 24--25 24--89 24--56 24--32 24--83 24--79
#> + ... omitted several edges
Swapping algorithm to tune degree-trait correlation (Lerman, Yan, and Wu 2016):
# we manipulate degree-trait correlation (rho) by swapping attribute values among the nodes. To
# increase \rho_{kx}, we randomly choose nodes v_1 with x=1 and v_0 with x=0 and swap their
# attributes if the degree of the degree of v_0 is greater than that of v_1 (until the desired
# \rho_{kx} is reached; or it no longer changes).
fdegtraitcor <- function(network) {
roles <- ifelse(V(network)$role == "trendsetter", 1, 0)
degrees <- degree(network)
return(list(cor = cor(roles, degrees), roles = roles, degrees = degrees))
}
# swapping function to adjust degree-trait correlation
fswap_rho <- function(network, target_rho, max_iter = 1000, tol = 0.05, verbose = TRUE) {
current <- fdegtraitcor(network)
iteration <- 1
best_network <- network
best_rho <- current$cor
if (verbose) {
cat("Target degree-trait correlation:", target_rho, "\n")
cat("Starting degree-trait correlation:", current$cor, "\n")
cat("Tolerance:", tol, "\n\n")
}
while (iteration <= max_iter) {
# check if we are already within tolerance
if (abs(current$cor - target_rho) <= tol) {
if (verbose)
cat("Target reached within tolerance at iteration", iteration, ".\n")
break
}
# randomly select nodes for swapping
v1 <- sample(which(current$roles == 1), 1)
v0 <- sample(which(current$roles == 0), 1)
# get degrees of selected nodes
k1 <- current$degrees[v1]
k0 <- current$degrees[v0]
# swap roles if condition k_v0 > k_v1 is met
if (k0 > k1) {
current$roles[v1] <- 0
current$roles[v0] <- 1
# update graph roles
V(network)$role <- ifelse(current$roles == 1, "trendsetter", "conformist")
# recalculate degree-trait correlation
current <- fdegtraitcor(network)
# check if this is the closest correlation to the target so far
if (abs(current$cor - target_rho) < abs(best_rho - target_rho)) {
best_network <- network
best_rho <- current$cor
if (verbose) {
cat("Trait-swapping at iteration", iteration, "brought correlation closer to target! Current correlation:",
current$cor, "\n")
}
}
}
iteration <- iteration + 1
}
# check if the final correlation is worse than the best correlation
final_correlation <- current$cor
if (abs(final_correlation - target_rho) > abs(best_rho - target_rho)) {
if (verbose) {
cat("\nWarning: Final iteration made the correlation worse. Reverting to best observed correlation.\n")
}
}
if (verbose) {
cat("\nFinal degree-trait correlation:", best_rho, "\n")
if (abs(best_rho - target_rho) <= tol) {
cat("Target reached within tolerance.\n")
} else if (iteration > max_iter) {
cat("Reached maximum iterations without meeting target.\n")
}
}
return(best_network)
}
Apply the function:
# get current degree-trait correlation
fdegtraitcor(network)
#> $cor
#> [1] -0.1127529
#>
#> $roles
#> [1] 0 0 0 0 0 0 0 0 0 0 0 0 0 1 1 0 0 0 0 0 0 0 0 0 0 0 0 1 0 0 0 0 0 0 0 0 0 0 1 0 1 0 0 1 1 0 0 0
#> [49] 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 1 0 0 0 0 1 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0
#>
#> $degrees
#> [1] 3 6 3 9 13 3 4 9 4 4 16 4 5 4 3 10 3 3 3 15 9 5 5 44 5 5 4 4 3 3 17 10
#> [33] 5 6 3 4 6 3 3 3 3 3 3 3 3 3 3 4 3 8 3 4 6 3 4 3 3 6 9 3 5 3 3 3
#> [65] 7 4 7 7 6 4 6 5 5 3 4 3 3 4 3 3 3 5 3 6 3 4 28 9 9 3 3 5 3 5 3 3
# set new target
target = 0.5
fdegtraitcor(fswap_rho(network = network, target_rho = target, tol = 0.01))
#> Target degree-trait correlation: 0.5
#> Starting degree-trait correlation: -0.1127529
#> Tolerance: 0.01
#>
#> Trait-swapping at iteration 1 brought correlation closer to target! Current correlation: -0.06649532
#> Trait-swapping at iteration 4 brought correlation closer to target! Current correlation: -0.04667063
#> Trait-swapping at iteration 5 brought correlation closer to target! Current correlation: -0.0400624
#> Trait-swapping at iteration 8 brought correlation closer to target! Current correlation: -0.03345417
#> Trait-swapping at iteration 12 brought correlation closer to target! Current correlation: -0.02684594
#> Trait-swapping at iteration 14 brought correlation closer to target! Current correlation: -0.02023771
#> Trait-swapping at iteration 18 brought correlation closer to target! Current correlation: -0.007021245
#> Trait-swapping at iteration 19 brought correlation closer to target! Current correlation: 0.03923637
#> Trait-swapping at iteration 20 brought correlation closer to target! Current correlation: 0.07227752
#> Trait-swapping at iteration 22 brought correlation closer to target! Current correlation: 0.08549398
#> Trait-swapping at iteration 36 brought correlation closer to target! Current correlation: 0.09210221
#> Trait-swapping at iteration 41 brought correlation closer to target! Current correlation: 0.1846174
#> Trait-swapping at iteration 88 brought correlation closer to target! Current correlation: 0.257308
#> Trait-swapping at iteration 102 brought correlation closer to target! Current correlation: 0.3894726
#> Trait-swapping at iteration 117 brought correlation closer to target! Current correlation: 0.3960808
#> Trait-swapping at iteration 144 brought correlation closer to target! Current correlation: 0.4092973
#> Trait-swapping at iteration 244 brought correlation closer to target! Current correlation: 0.429122
#> Trait-swapping at iteration 260 brought correlation closer to target! Current correlation: 0.4423384
#> Trait-swapping at iteration 287 brought correlation closer to target! Current correlation: 0.4687714
#>
#> Warning: Final iteration made the correlation worse. Reverting to best observed correlation.
#>
#> Final degree-trait correlation: 0.4687714
#> Reached maximum iterations without meeting target.
#> $cor
#> [1] 0.4687714
#>
#> $roles
#> [1] 0 0 0 1 1 0 0 1 0 0 1 0 0 0 0 1 0 0 0 0 0 0 0 0 0 0 0 0 0 0 1 1 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0
#> [49] 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 1 0 1 0 0 0 0 0 0 0
#>
#> $degrees
#> [1] 3 6 3 9 13 3 4 9 4 4 16 4 5 4 3 10 3 3 3 15 9 5 5 44 5 5 4 4 3 3 17 10
#> [33] 5 6 3 4 6 3 3 3 3 3 3 3 3 3 3 4 3 8 3 4 6 3 4 3 3 6 9 3 5 3 3 3
#> [65] 7 4 7 7 6 4 6 5 5 3 4 3 3 4 3 3 3 5 3 6 3 4 28 9 9 3 3 5 3 5 3 3
Simulate networks (here, scale-free with alpha = 2.7) with independently varying degree assortativity and degree-trait correlation:
par(mfrow = c(3, 3))
# NETWORKS
alpha = 2.7
degseq <- fdegseq(n = 96, alpha = alpha, k_min = 3, seed = 2352)
network <- sample_degseq(degseq, method = "vl")
V(network)$role <- sample(c(rep("trendsetter", floor(n * p_t)), rep("conformist", n - floor(n * p_t))))
network1 <- frewire_r(network, target_r = -0.1, verbose = FALSE)
fplot_graph(network1, main = paste0("configuration model\ndegree sequence generated from p(k)~k^{-α}\nN=96; prop. trendsetter = 0.1; α=2.7; k_min=3\nr_kk = ",
round(assortativity_degree(network1), 3), "; p_kx = ", round(fdegtraitcor(network1)$cor, 3)))
network2 <- frewire_r(network, target_r = -0.3, verbose = FALSE)
fplot_graph(network2, main = paste0("configuration model\ndegree sequence generated from p(k)~k^{-α}\nN=96; prop. trendsetter = 0.1; α=2.7; k_min=3\nr_kk = ",
round(assortativity_degree(network2), 3), "; p_kx = ", round(fdegtraitcor(network2)$cor, 3)))
network3 <- fswap_rho(network2, target_rho = 0.5, verbose = FALSE)
fplot_graph(network3, main = paste0("configuration model\ndegree sequence generated from p(k)~k^{-α}\nN=96; prop. trendsetter = 0.1; α=2.7; k_min=3\nr_kk = ",
round(assortativity_degree(network3), 3), "; p_kx = ", round(fdegtraitcor(network3)$cor, 3)))
# DEGREE ASSORTATIVITY
plot(degree(network), knn(network)$knn, pch = 19, xlab = "Node degree (k)", ylab = "Average neighbor degree (<k'>)",
main = "degree assortativity")
plot(degree(network2), knn(network2)$knn, pch = 19, xlab = "Node degree (k)", ylab = "Average neighbor degree (<k'>)",
main = "degree assortativity")
plot(degree(network3), knn(network3)$knn, pch = 19, xlab = "Node degree (k)", ylab = "Average neighbor degree (<k'>)",
main = "degree assortativity")
# VIOLIN PLOT OF DEGREE DISTRIBUTION PER ROLE
# extract node degrees
degrees <- degree(network1)
trendsetter_degrees <- degrees[V(network1)$role == "trendsetter"]
conformist_degrees <- degrees[V(network1)$role == "conformist"]
vioplot(trendsetter_degrees, conformist_degrees, names = c("Trendsetters", "Conformists"), col = c("#FFD700",
"#800080"), main = "degree distribution")
degrees2 <- degree(network2)
trendsetter_degrees2 <- degrees2[V(network2)$role == "trendsetter"]
conformist_degrees2 <- degrees2[V(network2)$role == "conformist"]
vioplot(trendsetter_degrees2, conformist_degrees2, names = c("Trendsetters", "Conformists"), col = c("#FFD700",
"#800080"), main = "degree distribution")
degrees3 <- degree(network3)
trendsetter_degrees3 <- degrees3[V(network3)$role == "trendsetter"]
conformist_degrees3 <- degrees3[V(network3)$role == "conformist"]
vioplot(trendsetter_degrees3, conformist_degrees3, names = c("Trendsetters", "Conformists"), col = c("#FFD700",
"#800080"), main = "degree distribution")

Specify a parameter space to generate networks with independently varied degree distribution, degree assortativity, degree-trait correlation:
# structural parameters
n = 96
k_min = 3
p_t = 0.1
# target parameters
alphas <- c(2.4, 2.7, 3)
distributions <- c("power-law", "log-normal")
target_r_values <- list(seq(-0.4, -0.15, by = 0.05), seq(-0.35, -0.05, by = 0.05), seq(-0.3, 0.1, by = 0.05))
names(target_r_values) <- alphas
target_rho_values <- seq(0, 0.6, by = 0.1)
# list for results
results <- list()
for (dist in distributions) {
for (alpha in alphas) {
# generate degree sequence from specified distribution and alpha
degseq <- fdegseq(n = n, dist = dist, alpha = alpha, k_min = k_min, seed = 123)
# create an undirected, connected, simple graph using Viger-Latapy algorithm
network <- sample_degseq(degseq, method = "vl")
# assign roles randomly, based on proportion trendsetter p_t
V(network)$role <- sample(c(rep("trendsetter", floor(n * p_t)), rep("conformist", n - floor(n *
p_t))))
for (target_r in target_r_values[[as.character(alpha)]]) {
# loop over target_r values
rewired_network <- frewire_r(network, target_r, verbose = FALSE)
actual_r <- assortativity_degree(rewired_network)
for (target_rho in target_rho_values) {
# loop over target_rho values
final_network <- fswap_rho(rewired_network, target_rho, verbose = FALSE)
final_rho <- fdegtraitcor(final_network)$cor
# and assume that agents act according to their role agents don't observe each
# others' roles, but do observe actions (based on which they can infer
# roles/preferences...)
V(final_network)$action <- ifelse(V(final_network)$role == "trendsetter", 1, 0)
# store results
results <- append(results, list(list(distribution = dist, alpha = alpha, target_r = target_r,
actual_r = actual_r, target_rho = target_rho, actual_rho = final_rho, network = final_network)))
}
}
}
}
Sample some simulations from the ‘network universe’:
# excluding the graph object:
lapply(sample(results, 5), function(x) {
z <- x # copy
z$network <- NULL
z
})
#> [[1]]
#> [[1]]$distribution
#> [1] "log-normal"
#>
#> [[1]]$alpha
#> [1] 2.7
#>
#> [[1]]$target_r
#> [1] -0.25
#>
#> [[1]]$actual_r
#> [1] -0.2516837
#>
#> [[1]]$target_rho
#> [1] 0.5
#>
#> [[1]]$actual_rho
#> [1] 0.4841843
#>
#>
#> [[2]]
#> [[2]]$distribution
#> [1] "log-normal"
#>
#> [[2]]$alpha
#> [1] 2.7
#>
#> [[2]]$target_r
#> [1] -0.3
#>
#> [[2]]$actual_r
#> [1] -0.2920339
#>
#> [[2]]$target_rho
#> [1] 0.4
#>
#> [[2]]$actual_rho
#> [1] 0.3513395
#>
#>
#> [[3]]
#> [[3]]$distribution
#> [1] "power-law"
#>
#> [[3]]$alpha
#> [1] 3
#>
#> [[3]]$target_r
#> [1] -0.25
#>
#> [[3]]$actual_r
#> [1] -0.240705
#>
#> [[3]]$target_rho
#> [1] 0.2
#>
#> [[3]]$actual_rho
#> [1] 0.14159
#>
#>
#> [[4]]
#> [[4]]$distribution
#> [1] "power-law"
#>
#> [[4]]$alpha
#> [1] 3
#>
#> [[4]]$target_r
#> [1] -0.1
#>
#> [[4]]$actual_r
#> [1] -0.09125596
#>
#> [[4]]$target_rho
#> [1] 0.5
#>
#> [[4]]$actual_rho
#> [1] 0.4929644
#>
#>
#> [[5]]
#> [[5]]$distribution
#> [1] "log-normal"
#>
#> [[5]]$alpha
#> [1] 2.4
#>
#> [[5]]$target_r
#> [1] -0.2
#>
#> [[5]]$actual_r
#> [1] -0.1954962
#>
#> [[5]]$target_rho
#> [1] 0.2
#>
#> [[5]]$actual_rho
#> [1] 0.1673623
A function to calculate the magnitude of the “majority illusion”, based on the network structure, and a given majority threshold (here, “weak influence”, so more than half of neighbors need to adopt the trend in order for an ego to be persuaded to do the same):
calculate_majority_illusion <- function(network, threshold = 0.5) {
roles <- V(network)$role
actions <- V(network)$action
# initialize counter for majority illusion
mi_count <- 0
# loop over conformists
for (v in V(network)) {
if (roles[v] == "conformist") {
neighbors <- neighbors(network, v)
trend_neighbors <- sum(actions[neighbors] == 1)
prop_trend <- trend_neighbors/length(neighbors)
if (prop_trend > threshold) {
# under 'strong influence' half of the neighborhood suffices; so set threshold at
# .49
mi_count <- mi_count + 1
}
}
}
# return fraction of conformists who have majority illusion
return(mi_count/sum(roles == "conformist"))
}
Apply the function over our ‘network universe’, where each network corresponds to a specific parameter space row, generated using a unique seed:
plotdata <- do.call(rbind, lapply(results, function(res) {
dist <- factor(res$dist, levels = c("power-law", "log-normal"))
alpha <- res$alpha
r <- res$actual_r
rho <- res$actual_rho
network <- res$network
# calculate the majority illusion (i.e., the proportion of conformists whose neighbors meet or
# exceed threshold φ)
mi <- calculate_majority_illusion(network)
# create a data-frame
data.frame(dist = dist, alpha = alpha, r = r, rho = rho, mi = mi)
}))
# make separate data-frames for each level of alpha
alpha1 <- plotdata[plotdata$alpha == unique(plotdata$alpha)[1], ]
alpha2 <- plotdata[plotdata$alpha == unique(plotdata$alpha)[2], ]
alpha3 <- plotdata[plotdata$alpha == unique(plotdata$alpha)[3], ]
# create bins for r (deg-assorativity)
fcreate_bins <- function(data, variable = "r", out = 6) {
rvals <- data[[variable]]
quant <- quantile(rvals, probs = seq(0, 1, length.out = out))
# generate labels dynamically
labels <- sapply(1:(length(quant) - 1), function(i) {
paste0(round(quant[i], 2), " < r ≤ ", round(quant[i + 1], 2))
})
# add categories to the dataset
data$r_cats <- cut(rvals, breaks = quant, include.lowest = TRUE, labels = labels)
return(data)
}
# apply binning to each subset
alpha1 <- fcreate_bins(alpha1)
alpha2 <- fcreate_bins(alpha2)
alpha3 <- fcreate_bins(alpha3)
plot1 <- ggplot(alpha1, aes(x = rho, y = mi, color = as.factor(r_cats))) + facet_wrap(~dist, nrow = 2) +
geom_point(alpha = 0.7, size = 2) + geom_smooth(se = FALSE, method = "loess") + scale_y_continuous(limits = c(-0.05,
0.7)) + labs(x = expression("degree-trait correlation (" ~ rho[kx] ~ ")"), y = "prop. conformists w/ prop. trendsetter nbh. > φ",
color = expression("degree assortativity (" ~ r[kk] ~ ")")) + theme(legend.position = c(0.3, 0.35)) +
ggtitle(paste0("α=", alpha1$alpha))
plot2 <- ggplot(alpha2, aes(x = rho, y = mi, color = as.factor(r_cats))) + facet_wrap(~dist, nrow = 2) +
geom_point(alpha = 0.7, size = 2) + geom_smooth(se = FALSE, method = "loess") + scale_y_continuous(limits = c(-0.05,
0.7)) + labs(x = expression("degree-trait correlation (" ~ rho[kx] ~ ")"), y = "prop. conformists w/ prop. trendsetter nbh. > φ",
color = expression("degree assortativity (" ~ r[kk] ~ ")")) + theme(legend.position = c(0.3, 0.35)) +
ggtitle(paste0("α=", alpha2$alpha))
plot3 <- ggplot(alpha3, aes(x = rho, y = mi, color = as.factor(r_cats))) + facet_wrap(~dist, nrow = 2) +
geom_point(alpha = 0.7, size = 2) + geom_smooth(se = FALSE, method = "loess") + scale_y_continuous(limits = c(-0.05,
0.7)) + labs(x = expression("degree-trait correlation (" ~ rho[kx] ~ ")"), y = "prop. conformists w/ prop. trendsetter nbh. > φ",
color = expression("degree assortativity (" ~ r[kk] ~ ")")) + theme(legend.position = c(0.3, 0.35)) +
ggtitle(paste0("α=", alpha3$alpha))
# combine
ggarrange(plot1, plot2, plot3, ncol = 3) + ggtitle(" \"Majority illusion\" in networks with a power-law and log-normal degree distribution") +
theme(plot.title = element_text(hjust = 0.5, face = "bold", size = 16))

Now for each parameter space row generate networks over N different seeds:
# set up parallel backend to increase efficiency
ncores <- detectCores() - 1
cl <- makeCluster(ncores)
registerDoParallel(cl)
# number of seeds
nIter <- 100
distributions <- c("power-law", "log-normal")
# list to store results in
rep_results <- list()
# parallel processing using foreach
rep_results <- foreach(alpha = alphas, .combine = 'c', .packages = c("igraph")) %:%
foreach(iter = 1:nIter, .combine = 'c', .packages = c("igraph")) %dopar% {
# seed must vary with each iteration
seed <- 123 + iter
set.seed(seed)
print(paste0("Running iteration ", iter, "/", nIter, " for alpha = ", alpha))
results <- list() #temporary storage for the results
# loop over distribution types:
for (dist in distributions) {
# generate degree sequence with specified distribution type
degseq <- fdegseq(n = n, alpha = alpha, k_min = k_min, seed = 123 + iter, dist = dist)
# construct the network
network <- sample_degseq(degseq, method = "vl")
# assign roles randomly
V(network)$role <- sample(c(rep("trendsetter", floor(n * p_t)), rep("conformist", n - floor(n * p_t))))
# loop over target_r values
for (target_r in target_r_values[[as.character(alpha)]]) {
rewired_network <- frewire_r(network, target_r, verbose = FALSE, max_iter = 1e4)
actual_r <- assortativity_degree(rewired_network)
# loop over target_rho values
for (target_rho in target_rho_values) {
final_network <- fswap_rho(rewired_network, target_rho, verbose = FALSE)
final_rho <- fdegtraitcor(final_network)$cor
#update choices (choice==role):
V(final_network)$action <- ifelse(V(final_network)$role == "trendsetter", 1, 0)
#and calculate global majority illusion
mi <- calculate_majority_illusion(final_network)
# append results
results <- append(results, list(list(
dist = dist,
alpha = alpha,
target_r = target_r,
actual_r = actual_r,
target_rho = target_rho,
actual_rho = final_rho,
mi = mi,
seed = seed, #store seed for reproducibility
network = final_network # store networks, so we can access these for the simulations.
)))
}
}
}
results
}
stopCluster(cl)
#save output..
fsave(rep_results, "networks.Rda")
Visualize the extent of majority illusion depending on the network parameters across two degree distribution types, based on network simulations with different seeds. Visualizations include both observed MI and predicted values from OLS regression.
nets <- fload("./data/processed/20250107networks.Rda")
# to a dataframe:
df <- do.call(rbind, lapply(nets, function(res) {
data.frame(
seed = res$seed,
dist = res$dist,
alpha = res$alpha,
target_r = res$target_r,
actual_r = res$actual_r,
target_rho = res$target_rho,
actual_rho = res$actual_rho,
mi = res$mi
)
}))
# create a 3D scatterplot of observed values of MI, disaggregated by
# degree distribution type
fig1 <- plot_ly()
for (dist in unique(df$dist)) {
fig1 <- fig1 %>%
add_trace(
data = df[df$dist == dist, ],
x = ~alpha,
y = ~actual_rho,
z = ~actual_r,
color = ~mi,
type = 'scatter3d',
mode = 'markers',
marker = list(size = 3),
name = as.character(dist),
hovertemplate = paste(
"α: %{x}<br>",
"ρ_{xk}: %{y}<br>",
"r_{kk}: %{z}<br>",
"MI: %{marker.color}<extra></extra>"
)
)
}
# add layout details:
fig1 <- fig1 %>%
layout(
title = '3D scatterplot of the magnitude of the "majority illusion" by degree distribution type',
scene = list(
xaxis = list(title = "α"),
yaxis = list(title = "ρ_{xk}"),
zaxis = list(title = "r_{kk}")
)
) %>%
colorbar(title="Majority illusion")
#fit models => predicted values
m1 <- lm(mi ~ actual_rho + actual_r + as.factor(alpha), data = df[df$dist == "power-law",])
m2 <- lm(mi ~ actual_rho + actual_r + as.factor(alpha) + actual_rho:actual_r, data = df[df$dist == "power-law",])
m3 <- lm(mi ~ actual_rho + actual_r + as.factor(alpha) + actual_rho:actual_r + actual_rho:as.factor(alpha) + actual_r:as.factor(alpha), data = df[df$dist == "power-law",])
#anova(m1,m2,m3)
#summary(m3)
# create a sequence of values for rho, r, and alpha
rho_vals <- seq(min(df[df$dist == "power-law",]$actual_rho), max(df[df$dist == "power-law",]$actual_rho), length.out = 50)
r_vals <- seq(min(df[df$dist == "power-law",]$actual_r), max(df[df$dist == "power-law",]$actual_r), length.out = 50)
#alpha_vals <- seq(2.4, 3, by=0.1)
alpha_vals <- c(2.4,2.7,3)
# function to generate the surface data for a given alpha value
fsurfacedat <- function(alpha_val) {
# create a grid of r and rho values
grid <- expand.grid(actual_r = r_vals, actual_rho = rho_vals)
# fixed alpha value
grid$alpha <- alpha_val
# predict the values of mi using the model
grid$mi_pred <- predict(m3, newdata = grid)
# reshape the predicted values into a matrix
mi_matrix <- matrix(grid$mi_pred, nrow = length(r_vals), ncol = length(rho_vals), byrow = TRUE)
return(mi_matrix)
}
# precompute the surface data for all alpha values
surface_data_list <- lapply(alpha_vals, fsurfacedat)
# find the global range of MM (z values)
z_min <- min(sapply(surface_data_list, min))
z_max <- max(sapply(surface_data_list, max))
# create the initial surface plot with the first alpha value
fig2 <- plot_ly(
x = r_vals, # x-axis: r
y = rho_vals, # y-axis: rho
z = surface_data_list[[1]], # surface data for the first alpha
type = "surface",
colorbar = list(
title = "MI",
cmin = z_min,
cmax = z_max
),
cmin = z_min,
cmax = z_max,
hovertemplate = paste(
"r_{kk}: %{x:.2f}<br>",
"ρ_{xk}: %{y:.2f}<br>",
"MI: %{z:.2f}<extra></extra>"
)
)
# add slider for dynamic alpha selection
fig2 <- fig2 %>%
layout(
title = paste('Predicted "majority illusion" in scale-free network from OLS model for α =', alpha_vals[1]),
scene = list(
xaxis = list(title = "r_{kk}"),
yaxis = list(title = "ρ_{kx}"),
zaxis = list(
title = "MI",
range = c(z_min, z_max)
)
),
sliders = list(
list(
active = 0, # start with the first alpha value
steps = lapply(seq_along(alpha_vals), function(i) {
list(
method = "update",
args = list(
list(
z = list(surface_data_list[[i]]) #update surface data
),
list(
title = paste('Predicted "majority illusion" in scale-free network from OLS model for α =', alpha_vals[i]) # update title
)
),
label = as.character(alpha_vals[i]) # slider label
)
}),
currentvalue = list(
prefix = "α: ", # text displayed next to the slider
font = list(size = 16)
)
)
)
) %>%
colorbar(title="Majority illusion")
fig1
fig2
Now that we have our “starting networks”, let’s simulate the evolution of an unpopular norm using our utility function:
fabm <- function(network = network, # the generated network
rounds = 10, # number of timesteps/rounds
choice_rule = "deterministic", # choice update rule
utility_fn = futility, # the utility function
params = list(s=10, e=10, w=30, z=35), # utility parameters
mi_threshold = ifelse(params$z > 50, .49, .50), # under the "strong influence" condition (i.e., 50<z<90) half of neighbors adopting the trend is sufficient; under the "weak influence" condition (i.e., 30<z<50) *more* than half of neighbors adopting the trend is needed to be influenced.)
plot = FALSE ) { # return plot
# make an agents dataframe
agents <- tibble(
id = 1:length(network),
role = V(network)$role,
preference = ifelse(role == "trendsetter", 1, 0), # 1 = follow trend, 0 = not follow
choice = NA
)
# initialize decision history
decision_history <- tibble()
# simulation loop
for (t in 1:rounds) {
if (t == 1) {
# round 1: agents make decisions based on private preferences (no social information available yet)
agents <- agents %>%
mutate(choice = preference)
} else {
# round t > 1: agents make decisions based on social information from neighbors (decisions at t-1)
agents <- agents %>%
rowwise() %>%
mutate(
util_1 = utility_fn(id, 1, agents, network, list(s = params$s, e = params$e, w = params$w, z = params$z))$utility,
util_0 = utility_fn(id, 0, agents, network, list(s = params$s, e = params$e, w = params$w, z = params$z))$utility,
# make decision based on expected utility
choice = ifelse(
choice_rule == "deterministic",
ifelse(util_1 > util_0, 1, 0), # deterministic rule
sample(c(1, 0), size = 1, prob = c(exp(util_1), exp(util_0))) # probabilistic rule (@RF: only for conformists? level of noise?)
)
)
}
# store the decisions for this round
decision_history <- bind_rows(decision_history, agents %>%
mutate(round = t))
}
# based on decision_history:
# 1. calculate global majority illusion over rounds
globMI <- numeric(rounds)
for (t in 1:rounds) {
if (t == 1) {
# in round 1: no social information, so no MI
globMI[t] <- NA
} else {
# rounds t > 1: calculate magnitude of majority illusion
# first, make a copy of the network object
exposure_network <- network
# update the actions of actors, based on their choices in the previous round
# after all, actors don't observe others' roles, but only their choices,
# based on which they can infer their role
V(exposure_network)$action <- decision_history[decision_history$round == t - 1,]$choice
globMI[t] <- calculate_majority_illusion(exposure_network, threshold = mi_threshold)
}
}
# to long data frame:
MI <- data.frame(
round = 1:rounds,
outcome = "majority_illusion",
statistic = globMI
)
# 2. calculate the evolution of the unpopular norm
UN <- decision_history %>%
group_by(round) %>%
summarise(
follow_trend = mean(choice == 1, na.rm = TRUE)
) %>%
pivot_longer(cols = c("follow_trend"),
names_to = "outcome",
values_to = "statistic")
# bind
plotdata <- rbind(MI, UN)
fig <- ggplot(plotdata, aes(x=round, y=statistic, color=factor(outcome))) +
geom_line() +
geom_point() +
scale_y_continuous(labels = scales::percent_format(scale = 100), limits = c(0,1)) +
scale_x_continuous(breaks = seq(1, max(plotdata$round), by = 1)) +
labs(
title = "Evolution of an unpopular norm",
subtitle = "`follow_trend` denotes the percentage of all agents that follow the trend.\n`majority_illusion` reflects the percentage of conformists whose neighbors meet or exceed the adoption threshold φ (i.e., 0.50),\nwith 'strong influence' referring to both meeting and exceeding the threshold, and 'weak influence' to exceeding it only.\nThe grey dashed line reflects the percentage of agents whose role is trendsetter.",
x = "round",
y = "% agents",
color = "outcome") +
theme(panel.grid.minor.x = element_blank(),
#legend.position = "bottom",
plot.subtitle = element_text(size = 8)) +
geom_hline(yintercept = prop.table(table(agents$role))[2], linetype = "dashed", color = "darkgrey", size = 1)
if(plot==TRUE) {
return(list(decision_history=decision_history,
evo=plotdata,
plot=fig))
} else {
return(list(decision_history=decision_history,
evo=plotdata
))
}
}
Select random network from the networks with the top 10% highest majority illusion and simulate the evolution of the trend:
set.seed(235435) # set seed for reproducibility
mis <- sapply(nets, function(x) x$mi) # extract MI values
sorted <- order(mis) # sort indices by MI
n <- length(mis)
top10 <- sorted[ceiling(0.9 * n):n] # top 10% MI
ind <- sample(top10, 10) # select random elements
# loop through each selection network; inspect the evolution of MI and UP, for both weak and strong
# influence scenarios
for (i in ind) {
# access network and statistics
network <- nets[[i]]$network
dist <- nets[[i]]$dist
alpha <- nets[[i]]$alpha
actual_r <- nets[[i]]$actual_r
actual_rho <- nets[[i]]$actual_rho
mi <- nets[[i]]$mi
# print statistics
cat("Selected network", i, "\n")
cat("Distribution:", dist, "\n")
if (dist == "power-law") {
cat("α:", alpha, "\n")
}
cat("r_{kk}:", actual_r, "\n")
cat("ρ_{kx}:", actual_rho, "\n")
cat("Starting MI:", mi, "\n")
# weak influence
cat("Running simulation for \"weak influence\" scenario...\n")
print(fabm(network = network, rounds = 20, params = list(s = 10, e = 10, w = 30, z = 40), plot = TRUE)$plot)
# strong influence
cat("Running simulation for \"strong influence\" scenario...\n")
print(fabm(network = network, rounds = 20, params = list(s = 10, e = 10, w = 30, z = 60), plot = TRUE)$plot)
}
#> Selected network 15753
#> Distribution: power-law
#> α: 2.7
#> r_{kk}: -0.2478605
#> ρ_{kx}: 0.1802289
#> Starting MI: 0.1609195
#> Running simulation for "weak influence" scenario...

#> Running simulation for "strong influence" scenario...

#> Selected network 4900
#> Distribution: power-law
#> α: 2.4
#> r_{kk}: -0.2403668
#> ρ_{kx}: 0.5679784
#> Starting MI: 0.2068966
#> Running simulation for "weak influence" scenario...

#> Running simulation for "strong influence" scenario...

#> Selected network 17338
#> Distribution: power-law
#> α: 2.7
#> r_{kk}: -0.361668
#> ρ_{kx}: 0.4864376
#> Starting MI: 0.2183908
#> Running simulation for "weak influence" scenario...

#> Running simulation for "strong influence" scenario...

#> Selected network 2877
#> Distribution: power-law
#> α: 2.4
#> r_{kk}: -0.3051902
#> ρ_{kx}: 0.5580024
#> Starting MI: 0.1724138
#> Running simulation for "weak influence" scenario...

#> Running simulation for "strong influence" scenario...

#> Selected network 13566
#> Distribution: log-normal
#> r_{kk}: -0.2407299
#> ρ_{kx}: 0.5754819
#> Starting MI: 0.183908
#> Running simulation for "weak influence" scenario...

#> Running simulation for "strong influence" scenario...

#> Selected network 6265
#> Distribution: log-normal
#> r_{kk}: -0.3903671
#> ρ_{kx}: 0.556112
#> Starting MI: 0.1954023
#> Running simulation for "weak influence" scenario...

#> Running simulation for "strong influence" scenario...

#> Selected network 139
#> Distribution: log-normal
#> r_{kk}: -0.341074
#> ρ_{kx}: 0.5159114
#> Starting MI: 0.1724138
#> Running simulation for "weak influence" scenario...

#> Running simulation for "strong influence" scenario...

#> Selected network 10364
#> Distribution: power-law
#> α: 2.7
#> r_{kk}: -0.3400668
#> ρ_{kx}: 0.424732
#> Starting MI: 0.2758621
#> Running simulation for "weak influence" scenario...

#> Running simulation for "strong influence" scenario...

#> Selected network 7749
#> Distribution: power-law
#> α: 2.4
#> r_{kk}: -0.4913325
#> ρ_{kx}: 0.6382822
#> Starting MI: 0.5402299
#> Running simulation for "weak influence" scenario...

#> Running simulation for "strong influence" scenario...

#> Selected network 385
#> Distribution: log-normal
#> r_{kk}: -0.3900845
#> ρ_{kx}: 0.5710399
#> Starting MI: 0.5517241
#> Running simulation for "weak influence" scenario...

#> Running simulation for "strong influence" scenario...

# clean our working environement, but keep our functions, our dataframe, and our list of starting
# networks
rm(list = setdiff(ls(), c("df", "nets", lsf.str())))
gc()
# @RF: let's start not with all networks... but select for each parameter space row a few seeds
sub_nets <- Filter(function(x) x$seed %in% c(124), nets)
# parameters
rounds = 10
z1 = 40 # weak influence
z2 = 60 # strong influence
# parallel backend
ncores <- detectCores() - 1
cl <- makeCluster(ncores)
registerDoParallel(cl)
# export custom functions and required objects
clusterExport(cl, c("fabm", "futility", "sub_nets", "rounds", "z1", "z2"))
# run loop in parallel
system.time({
results <- foreach(i = 1:length(sub_nets), .combine = "rbind", .packages = c("igraph", "tidyverse")) %dopar%
{
# simulation for weak influence scenario
sim_weak <- fabm(network = sub_nets[[i]]$network, rounds = rounds, params = list(s = 10,
e = 10, w = 30, z = z1))$evo
outcome_weak <- sim_weak$statistic[sim_weak$round == rounds & sim_weak$outcome == "follow_trend"]
# simulation for strong influence scenario
sim_strong <- fabm(network = sub_nets[[i]]$network, rounds = rounds, params = list(s = 10,
e = 10, w = 30, z = z2))$evo
outcome_strong <- sim_strong$statistic[sim_strong$round == rounds & sim_strong$outcome ==
"follow_trend"]
gc()
# return results as a row to be rbinded in a results matrix
return(c(outcome_weak, outcome_strong))
}
})
stopCluster(cl)
closeAllConnections()
# and add these outcomes to our dataframe: df$outcome_weak <- outcome_weak df$outcome_strong <-
# outcome_strong
Copyright © Rob Franken